void init_fpu(void)
{
- __asm__("fninit");
- if ( cpu_has_xmm ) load_mxcsr(0x1f80);
+ __asm__ __volatile__ ( "fninit" );
+ if ( cpu_has_xmm )
+ load_mxcsr(0x1f80);
set_bit(EDF_DONEFPUINIT, ¤t->ed_flags);
}
-static inline void __save_init_fpu( struct exec_domain *tsk )
-{
- if ( cpu_has_fxsr ) {
- asm volatile( "fxsave %0 ; fnclex"
- : "=m" (tsk->arch.i387) );
- } else {
- asm volatile( "fnsave %0 ; fwait"
- : "=m" (tsk->arch.i387) );
- }
- clear_bit(EDF_USEDFPU, &tsk->ed_flags);
-}
-
-void save_init_fpu( struct exec_domain *tsk )
+void save_init_fpu(struct exec_domain *tsk)
{
/*
* The guest OS may have set the 'virtual STTS' flag.
* This causes us to set the real flag, so we'll need
* to temporarily clear it while saving f-p state.
*/
- if ( test_bit(EDF_GUEST_STTS, &tsk->ed_flags) ) clts();
- __save_init_fpu(tsk);
+ if ( test_bit(EDF_GUEST_STTS, &tsk->ed_flags) )
+ clts();
+
+ if ( cpu_has_fxsr )
+ __asm__ __volatile__ (
+ "fxsave %0 ; fnclex"
+ : "=m" (tsk->arch.i387) );
+ else
+ __asm__ __volatile__ (
+ "fnsave %0 ; fwait"
+ : "=m" (tsk->arch.i387) );
+
+ clear_bit(EDF_USEDFPU, &tsk->ed_flags);
stts();
}
-void restore_fpu( struct exec_domain *tsk )
+void restore_fpu(struct exec_domain *tsk)
{
- if ( cpu_has_fxsr ) {
- asm volatile( "fxrstor %0"
- : : "m" (tsk->arch.i387) );
- } else {
- asm volatile( "frstor %0"
- : : "m" (tsk->arch.i387) );
- }
+ if ( cpu_has_fxsr )
+ __asm__ __volatile__ (
+ "fxrstor %0"
+ : : "m" (tsk->arch.i387) );
+ else
+ __asm__ __volatile__ (
+ "frstor %0"
+ : : "m" (tsk->arch.i387) );
}
switch ( opcode )
{
case 0x06: /* CLTS */
- (void)do_fpu_taskswitch();
+ clear_bit(EDF_GUEST_STTS, &ed->ed_flags);
+ if ( test_bit(EDF_USEDFPU, &ed->ed_flags) )
+ clts();
break;
case 0x09: /* WBINVD */
switch ( (opcode >> 3) & 7 )
{
case 0: /* Write CR0 */
- if ( *reg & X86_CR0_TS ) /* XXX ignore all but TS bit */
- (void)do_fpu_taskswitch;
+ if ( *reg & X86_CR0_TS )
+ (void)do_fpu_taskswitch();
break;
case 2: /* Write CR2 */
#include <asm/processor.h>
extern void init_fpu(void);
-extern void save_init_fpu( struct exec_domain *tsk );
-extern void restore_fpu( struct exec_domain *tsk );
+extern void save_init_fpu(struct exec_domain *tsk);
+extern void restore_fpu(struct exec_domain *tsk);
-#define unlazy_fpu( tsk ) do { \
- if ( test_bit(EDF_USEDFPU, &tsk->ed_flags) ) \
- save_init_fpu( tsk ); \
-} while (0)
-
-#define clear_fpu( tsk ) do { \
- if ( test_and_clear_bit(EDF_USEDFPU, &tsk->ed_flags) ) { \
- asm volatile("fwait"); \
- stts(); \
- } \
-} while (0)
+#define unlazy_fpu(_tsk) do { \
+ if ( test_bit(EDF_USEDFPU, &(_tsk)->ed_flags) ) \
+ save_init_fpu(_tsk); \
+} while ( 0 )
#define load_mxcsr( val ) do { \
- unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
- asm volatile( "ldmxcsr %0" : : "m" (__mxcsr) ); \
-} while (0)
+ unsigned long __mxcsr = ((unsigned long)(val) & 0xffbf); \
+ __asm__ __volatile__ ( "ldmxcsr %0" : : "m" (__mxcsr) ); \
+} while ( 0 )
#endif /* __ASM_I386_I387_H */